vlapic->regs_page = alloc_domheap_page(NULL);
if ( vlapic->regs_page == NULL )
{
- dprintk(XENLOG_ERR, "malloc vlapic regs error for vcpu %x\n",
+ dprintk(XENLOG_ERR, "malloc vlapic regs_page error for vcpu %x\n",
v->vcpu_id);
return -ENOMEM;
}
vlapic->regs = map_domain_page_global(page_to_mfn(vlapic->regs_page));
+ if ( vlapic->regs == NULL )
+ {
+ dprintk(XENLOG_ERR, "malloc vlapic regs error for vcpu %x\n",
+ v->vcpu_id);
+ return -ENOMEM;
+ }
+
memset(vlapic->regs, 0, PAGE_SIZE);
vlapic_reset(vlapic);
if ( v->arch.paging.shadow.guest_vtable )
sh_unmap_domain_page_global(v->arch.paging.shadow.guest_vtable);
v->arch.paging.shadow.guest_vtable = sh_map_domain_page_global(gmfn);
+ /* PAGING_LEVELS==4 implies 64-bit, which means that
+ * map_domain_page_global can't fail */
}
else
v->arch.paging.shadow.guest_vtable = __linear_l4_table;
if ( v->arch.paging.shadow.guest_vtable )
sh_unmap_domain_page_global(v->arch.paging.shadow.guest_vtable);
v->arch.paging.shadow.guest_vtable = sh_map_domain_page_global(gmfn);
+ /* Does this really need map_domain_page_global? Handle the
+ * error properly if so. */
+ ASSERT( v->arch.paging.shadow.guest_vtable );
}
else
v->arch.paging.shadow.guest_vtable = __linear_l2_table;
idx = find_first_zero_bit(inuse, GLOBALMAP_BITS);
va = IOREMAP_VIRT_START + (idx << PAGE_SHIFT);
- ASSERT(va < FIXADDR_START);
+ if ( va >= FIXADDR_START )
+ {
+ va = 0;
+ goto fail;
+ }
}
set_bit(idx, inuse);
inuse_cursor = idx + 1;
+ fail:
spin_unlock(&globalmap_lock);
- pl2e = virt_to_xen_l2e(va);
- pl1e = l2e_to_l1e(*pl2e) + l1_table_offset(va);
- l1e_write(pl1e, l1e_from_pfn(mfn, __PAGE_HYPERVISOR));
+ if ( likely(va != 0) )
+ {
+ pl2e = virt_to_xen_l2e(va);
+ pl1e = l2e_to_l1e(*pl2e) + l1_table_offset(va);
+ l1e_write(pl1e, l1e_from_pfn(mfn, __PAGE_HYPERVISOR));
+ }
return (void *)va;
}